From fe9e657ab67857983e92da8b35bbee85dad2d632 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Wed, 4 Feb 2009 15:08:46 +0000 Subject: [PATCH] x86: Clean up PV guest LDT handling. 1. Do not touch deferred_ops in invalidate_shadow_ldt(), as we may not always be in a context where deferred_ops is valid. 2. Protected the shadow LDT with a lock, now that mmu updates are not protected by the per-domain lock. Signed-off-by: Keir Fraser --- xen/arch/x86/domain.c | 2 ++ xen/arch/x86/mm.c | 40 +++++++++++++++++++++--------------- xen/include/asm-x86/domain.h | 1 + 3 files changed, 26 insertions(+), 17 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 3a77443be8..ceda727701 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -352,6 +352,8 @@ int vcpu_initialise(struct vcpu *v) v->arch.perdomain_ptes = d->arch.mm_perdomain_pt + (v->vcpu_id << GDT_LDT_VCPU_SHIFT); + spin_lock_init(&v->arch.shadow_ldt_lock); + return (is_pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0); } diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 6e543b600f..9e288e44e7 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -179,12 +179,6 @@ l2_pgentry_t *compat_idle_pg_table_l2 = NULL; #define l3_disallow_mask(d) L3_DISALLOW_MASK #endif -static void queue_deferred_ops(struct domain *d, unsigned int ops) -{ - ASSERT(d == current->domain); - this_cpu(percpu_mm_info).deferred_ops |= ops; -} - void __init init_frametable(void) { unsigned long nr_pages, page_step, i, mfn; @@ -464,14 +458,18 @@ void update_cr3(struct vcpu *v) } -static void invalidate_shadow_ldt(struct vcpu *v) +static void invalidate_shadow_ldt(struct vcpu *v, int flush) { int i; unsigned long pfn; struct page_info *page; + BUG_ON(unlikely(in_irq())); + + spin_lock(&v->arch.shadow_ldt_lock); + if ( v->arch.shadow_ldt_mapcnt == 0 ) - return; + goto out; v->arch.shadow_ldt_mapcnt = 0; @@ -486,11 +484,12 @@ static void invalidate_shadow_ldt(struct vcpu *v) put_page_and_type(page); } - /* Dispose of the (now possibly invalid) mappings from the TLB. */ - if ( v == current ) - queue_deferred_ops(v->domain, DOP_FLUSH_TLB | DOP_RELOAD_LDT); - else - flush_tlb_mask(v->domain->domain_dirty_cpumask); + /* Rid TLBs of stale mappings (guest mappings and shadow mappings). */ + if ( flush ) + flush_tlb_mask(v->vcpu_dirty_cpumask); + + out: + spin_unlock(&v->arch.shadow_ldt_lock); } @@ -541,8 +540,10 @@ int map_ldt_shadow_page(unsigned int off) nl1e = l1e_from_pfn(mfn, l1e_get_flags(l1e) | _PAGE_RW); + spin_lock(&v->arch.shadow_ldt_lock); l1e_write(&v->arch.perdomain_ptes[off + 16], nl1e); v->arch.shadow_ldt_mapcnt++; + spin_unlock(&v->arch.shadow_ldt_lock); return 1; } @@ -989,7 +990,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d) (d == e) ) { for_each_vcpu ( d, v ) - invalidate_shadow_ldt(v); + invalidate_shadow_ldt(v, 1); } put_page(page); } @@ -2375,7 +2376,7 @@ int new_guest_cr3(unsigned long mfn) return 0; } - invalidate_shadow_ldt(curr); + invalidate_shadow_ldt(curr, 0); write_ptbase(curr); return 1; @@ -2390,7 +2391,7 @@ int new_guest_cr3(unsigned long mfn) return 0; } - invalidate_shadow_ldt(curr); + invalidate_shadow_ldt(curr, 0); old_base_mfn = pagetable_get_pfn(curr->arch.guest_table); @@ -2427,6 +2428,10 @@ static void process_deferred_ops(void) flush_tlb_local(); } + /* + * Do this after flushing TLBs, to ensure we see fresh LDT mappings + * via the linear pagetable mapping. + */ if ( deferred_ops & DOP_RELOAD_LDT ) (void)map_ldt_shadow_page(0); @@ -2799,7 +2804,8 @@ int do_mmuext_op( else if ( (curr->arch.guest_context.ldt_ents != ents) || (curr->arch.guest_context.ldt_base != ptr) ) { - invalidate_shadow_ldt(curr); + invalidate_shadow_ldt(curr, 0); + this_cpu(percpu_mm_info).deferred_ops |= DOP_FLUSH_TLB; curr->arch.guest_context.ldt_base = ptr; curr->arch.guest_context.ldt_ents = ents; load_LDT(curr); diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index c2aaff446d..f1ea4914a7 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -352,6 +352,7 @@ struct arch_vcpu /* Current LDT details. */ unsigned long shadow_ldt_mapcnt; + spinlock_t shadow_ldt_lock; struct paging_vcpu paging; -- 2.30.2